Clean up the interface for sharing xen-heap pages with guests.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Wed, 8 Mar 2006 23:45:40 +0000 (00:45 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Wed, 8 Mar 2006 23:45:40 +0000 (00:45 +0100)
Map trace buffer pages as DOMID_XEN pages.

Signed-off-by: Keir Fraser <keir@xensource.com>
tools/xenmon/xenbaked.c
tools/xentrace/xentrace.c
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/arch/x86/x86_32/mm.c
xen/arch/x86/x86_64/mm.c
xen/common/trace.c
xen/include/asm-ia64/mm.h
xen/include/asm-x86/grant_table.h
xen/include/asm-x86/mm.h

index 41b50a5daa7aec2ae312fed312679bba36723b25..1289e474253bbaa4159e28244eb3a6b64dbef010 100644 (file)
@@ -299,7 +299,7 @@ struct t_buf *map_tbufs(unsigned long tbufs_mfn, unsigned int num,
         exit(EXIT_FAILURE);
     }
 
-    tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
+    tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN,
                                         size * num, PROT_READ | PROT_WRITE,
                                         tbufs_mfn);
 
index 18105a19b7a71e59ef4b3ff1a2cbc0a795b68636..2780e1dbb2249a959b27c2cfcb7bac43e8908aa4 100644 (file)
@@ -144,7 +144,7 @@ struct t_buf *map_tbufs(unsigned long tbufs_mfn, unsigned int num,
         exit(EXIT_FAILURE);
     }
 
-    tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
+    tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN,
                                         size * num, PROT_READ | PROT_WRITE,
                                         tbufs_mfn);
 
index 745e8a2b5ef3e25a4f8721ce627b7486194425b6..836dc0e7078a1af85cda25a279059d1556ca292d 100644 (file)
@@ -312,7 +312,8 @@ int arch_domain_create(struct domain *d)
             goto fail_nomem;
 
         memset(d->shared_info, 0, PAGE_SIZE);
-        SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
+        share_xen_page_with_guest(
+            virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
     return 0;
index 5fb1198edf8bb4e08aff2053cc7c098f738168b0..1994ecb8c9e7b434b0307e48c8a6a52359c1c8cd 100644 (file)
@@ -176,10 +176,9 @@ void __init init_frametable(void)
 
 void arch_init_memory(void)
 {
-    extern void subarch_init_memory(struct domain *);
+    extern void subarch_init_memory(void);
 
     unsigned long i, pfn, rstart_pfn, rend_pfn;
-    struct page_info *page;
 
     memset(percpu_info, 0, sizeof(percpu_info));
 
@@ -189,6 +188,7 @@ void arch_init_memory(void)
      * their domain field set to dom_xen.
      */
     dom_xen = alloc_domain();
+    spin_lock_init(&dom_xen->page_alloc_lock);
     atomic_set(&dom_xen->refcnt, 1);
     dom_xen->domain_id = DOMID_XEN;
 
@@ -198,17 +198,13 @@ void arch_init_memory(void)
      * array. Mappings occur at the priv of the caller.
      */
     dom_io = alloc_domain();
+    spin_lock_init(&dom_io->page_alloc_lock);
     atomic_set(&dom_io->refcnt, 1);
     dom_io->domain_id = DOMID_IO;
 
     /* First 1MB of RAM is historically marked as I/O. */
     for ( i = 0; i < 0x100; i++ )
-    {
-        page = mfn_to_page(i);
-        page->count_info        = PGC_allocated | 1;
-        page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
-        page_set_owner(page, dom_io);
-    }
+        share_xen_page_with_guest(mfn_to_page(i), dom_io, XENSHARE_writable);
  
     /* Any areas not specified as RAM by the e820 map are considered I/O. */
     for ( i = 0, pfn = 0; i < e820.nr_map; i++ )
@@ -221,17 +217,45 @@ void arch_init_memory(void)
         for ( ; pfn < rstart_pfn; pfn++ )
         {
             BUG_ON(!mfn_valid(pfn));
-            page = mfn_to_page(pfn);
-            page->count_info        = PGC_allocated | 1;
-            page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
-            page_set_owner(page, dom_io);
+            share_xen_page_with_guest(
+                mfn_to_page(pfn), dom_io, XENSHARE_writable);
         }
         /* Skip the RAM region. */
         pfn = rend_pfn;
     }
     BUG_ON(pfn != max_page);
 
-    subarch_init_memory(dom_xen);
+    subarch_init_memory();
+}
+
+void share_xen_page_with_guest(
+    struct page_info *page, struct domain *d, int readonly)
+{
+    if ( page_get_owner(page) == d )
+        return;
+
+    spin_lock(&d->page_alloc_lock);
+
+    /* The incremented type count pins as writable or read-only. */
+    page->u.inuse.type_info  = (readonly ? PGT_none : PGT_writable_page);
+    page->u.inuse.type_info |= PGT_validated | 1;
+
+    page_set_owner(page, d);
+    wmb(); /* install valid domain ptr before updating refcnt. */
+    ASSERT(page->count_info == 0);
+    page->count_info |= PGC_allocated | 1;
+
+    if ( unlikely(d->xenheap_pages++ == 0) )
+        get_knownalive_domain(d);
+    list_add_tail(&page->list, &d->xenpage_list);
+
+    spin_unlock(&d->page_alloc_lock);
+}
+
+void share_xen_page_with_privileged_guests(
+    struct page_info *page, int readonly)
+{
+    share_xen_page_with_guest(page, dom_xen, readonly);
 }
 
 void write_ptbase(struct vcpu *v)
index 19dfc0170e90fde75ee6e51eac7a120cf50b5648..47211c9e69a39b24e337f02f901e0da67da85c39 100644 (file)
@@ -144,7 +144,7 @@ void __init zap_low_mappings(l2_pgentry_t *base)
     flush_tlb_all_pge();
 }
 
-void subarch_init_memory(struct domain *dom_xen)
+void subarch_init_memory(void)
 {
     unsigned long m2p_start_mfn;
     unsigned int i, j;
@@ -175,10 +175,7 @@ void subarch_init_memory(struct domain *dom_xen)
         for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
         {
             struct page_info *page = mfn_to_page(m2p_start_mfn + j);
-            page->count_info = PGC_allocated | 1;
-            /* Ensure it's only mapped read-only by domains. */
-            page->u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(page, dom_xen);
+            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
 
index 7e826da8b098ef85e16eb899a18b6dd720b268f8..aa51f434e95694bd8589fb259ea5ec7c70811ba4 100644 (file)
@@ -134,7 +134,7 @@ void __init zap_low_mappings(void)
     flush_tlb_all_pge();
 }
 
-void subarch_init_memory(struct domain *dom_xen)
+void subarch_init_memory(void)
 {
     unsigned long i, v, m2p_start_mfn;
     l3_pgentry_t l3e;
@@ -174,11 +174,7 @@ void subarch_init_memory(struct domain *dom_xen)
         for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         {
             struct page_info *page = mfn_to_page(m2p_start_mfn + i);
-            page->count_info = PGC_allocated | 1;
-            /* gdt to make sure it's only mapped read-only by non-privileged
-               domains. */
-            page->u.inuse.type_info = PGT_gdt_page | 1;
-            page_set_owner(page, dom_xen);
+            share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
         }
     }
 }
index d8b2fad1edf31b275455d9b16a362057aeff6506..b0594aa0a877608103d19cc4883dfe68841506d9 100644 (file)
@@ -83,8 +83,9 @@ static int alloc_trace_bufs(void)
 
     /* Share pages so that xentrace can map them. */
     for ( i = 0; i < nr_pages; i++ )
-        SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf + i * PAGE_SIZE), dom0);
-    
+        share_xen_page_with_privileged_guests(
+            virt_to_page(rawbuf) + i, XENSHARE_writable);
+
     for_each_online_cpu ( i )
     {
         buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
index 38fa7633779aa37ddcf0b1950940cdee6e749b41..eea665d8841fef38192af7da19aa52f9abbb073a 100644 (file)
@@ -118,7 +118,8 @@ static inline u32 pickle_domptr(struct domain *_d)
 #define page_set_owner(_p, _d) ((_p)->u.inuse._domain = pickle_domptr(_d))
 
 /* Dummy now */
-#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom) do { } while (0)
+#define share_xen_page_with_guest(p, d, r) do { } while (0)
+#define share_xen_page_with_privileged_guests(p, r) do { } while (0)
 
 extern struct page_info *frame_table;
 extern unsigned long frame_table_size;
index 797ac4d1f5768816e322fb06002972bf06599cb1..038819ba6851bed8fee76cc7f11de200dca942ad 100644 (file)
@@ -23,8 +23,9 @@ int steal_page_for_grant_transfer(
 
 #define gnttab_create_shared_page(d, t, i)                               \
     do {                                                                 \
-        SHARE_PFN_WITH_DOMAIN(                                           \
-            virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)), (d)); \
+        share_xen_page_with_guest(                                       \
+            virt_to_page((char *)(t)->shared + ((i) * PAGE_SIZE)),       \
+            (d), XENSHARE_writable);                                     \
         set_gpfn_from_mfn(                                               \
             (virt_to_maddr((t)->shared) >> PAGE_SHIFT) + (i),            \
             INVALID_M2P_ENTRY);                                          \
index 040d919626bb0bbec08092c7ea348a028f2bcbd7..8cfe942e8e202cc882106b76220f22e59ad0305b 100644 (file)
@@ -138,21 +138,12 @@ static inline u32 pickle_domptr(struct domain *domain)
 #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
 
-#define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
-    do {                                                                    \
-        page_set_owner((_pfn), (_dom));                                     \
-        /* The incremented type count is intended to pin to 'writable'. */  \
-        (_pfn)->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;  \
-        wmb(); /* install valid domain ptr before updating refcnt. */       \
-        spin_lock(&(_dom)->page_alloc_lock);                                \
-        /* _dom holds an allocation reference */                            \
-        ASSERT((_pfn)->count_info == 0);                                    \
-        (_pfn)->count_info |= PGC_allocated | 1;                            \
-        if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
-            get_knownalive_domain(_dom);                                    \
-        list_add_tail(&(_pfn)->list, &(_dom)->xenpage_list);                \
-        spin_unlock(&(_dom)->page_alloc_lock);                              \
-    } while ( 0 )
+#define XENSHARE_writable 0
+#define XENSHARE_readonly 1
+extern void share_xen_page_with_guest(
+    struct page_info *page, struct domain *d, int readonly);
+extern void share_xen_page_with_privileged_guests(
+    struct page_info *page, int readonly);
 
 extern struct page_info *frame_table;
 extern unsigned long max_page;